From 6eeb1062160d864270a4bcaf08f381f3e5df8bfc Mon Sep 17 00:00:00 2001 From: "kaf24@freefall.cl.cam.ac.uk" Date: Fri, 29 Oct 2004 10:11:00 +0000 Subject: [PATCH] bitkeeper revision 1.1159.1.303 (41821734F7OlWCgL8OAaRtEc5i-iEA) Scrub memory on reboot. Security paranoia. --- xen/arch/x86/domain.c | 3 +++ xen/common/kernel.c | 2 ++ xen/common/page_alloc.c | 34 +++++++++++++++++++++++++++++++++- xen/include/xen/mm.h | 1 + 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index f635caac25..eaea07f51c 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -698,6 +698,9 @@ int construct_dom0(struct domain *p, return -EINVAL; } + /* Paranoia: scrub DOM0's memory allocation. */ + memset((void *)alloc_start, 0, alloc_end - alloc_start); + /* Construct a frame-allocation list for the initial domain. */ for ( mfn = (alloc_start>>PAGE_SHIFT); mfn < (alloc_end>>PAGE_SHIFT); diff --git a/xen/common/kernel.c b/xen/common/kernel.c index c2ecc32c3e..79bb14324a 100644 --- a/xen/common/kernel.c +++ b/xen/common/kernel.c @@ -344,6 +344,8 @@ void cmain(multiboot_info_t *mbi) init_domheap_pages(__pa(frame_table) + frame_table_size, dom0_memory_start); + scrub_heap_pages(); + init_trace_bufs(); domain_unpause_by_systemcontroller(current); diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 72e13ec9d3..5792e4a4e5 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -37,6 +37,7 @@ extern char opt_badpage[]; * One bit per page of memory. Bit set => page is allocated. */ +static unsigned long bitmap_size; /* in bytes */ static unsigned long *alloc_bitmap; #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8) @@ -139,7 +140,7 @@ unsigned long init_heap_allocator( unsigned long bitmap_start, unsigned long max_pages) { int i, j; - unsigned long bitmap_size, bad_pfn; + unsigned long bad_pfn; char *p; memset(avail, 0, sizeof(avail)); @@ -285,6 +286,37 @@ void free_heap_pages(int zone, struct pfn_info *pg, int order) } +/* + * Scrub all unallocated pages in all heap zones. This function is more + * convoluted than appears necessary because we do not want to continuously + * hold the lock or disable interrupts while scrubbing very large memory areas. + */ +void scrub_heap_pages(void) +{ + void *p; + unsigned long pfn, flags; + + for ( pfn = 0; pfn < (bitmap_size * 8); pfn++ ) + { + /* Quick lock-free check. */ + if ( allocated_in_map(pfn) ) + continue; + + spin_lock_irqsave(&heap_lock, flags); + + /* Re-check page status with lock held. */ + if ( !allocated_in_map(pfn) ) + { + p = map_domain_mem(pfn << PAGE_SHIFT); + clear_page(p); + unmap_domain_mem(p); + } + + spin_unlock_irqrestore(&heap_lock, flags); + } +} + + /************************* * XEN-HEAP SUB-ALLOCATOR diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 859db7c9f1..ac126568b2 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -11,6 +11,7 @@ unsigned long init_heap_allocator( void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages); struct pfn_info *alloc_heap_pages(int zone, int order); void free_heap_pages(int zone, struct pfn_info *pg, int order); +void scrub_heap_pages(void); /* Xen suballocator */ void init_xenheap_pages(unsigned long ps, unsigned long pe); -- 2.30.2